This workbook was created using the ‘dataexpks’ template:
https://github.com/DublinLearningGroup/dataexpks
This workbook performs the basic data exploration of the dataset.
dataexp_level_exclusion_threshold <- 100
dataexp_cat_level_count <- 40
dataexp_hist_bins_count <- 50
First we load the dataset.
### _TEMPLATE_
### Data is loaded into dataset rawdata_tbl here
### We may wish to set column typs
data_col_types <- cols(
# VAR1 = col_character()
# ,VAR2 = col_date()
# ,VAR3 = col_number()
)
### Data is loaded into dataset rawdata_tbl here
rawdata_tbl <- read_csv("data/lifebook_data.csv"
,locale = locale()
,col_types = data_col_types
,progress = FALSE
)
glimpse(rawdata_tbl)
## Observations: 1,500,000
## Variables: 25
## $ policy_id <chr> "C010000046", "C010000213", "C010000371", "C010000425...
## $ countyname <chr> "Longford County", "Louth County", "Cork County", "Ki...
## $ edname <chr> "Longford No. 1 Urban", "St. Peter's", "Clonakilty Ur...
## $ nuts3name <chr> "Midland", "Border", "South-West (IE)", "Mid-East", "...
## $ sa_id <chr> "A137043001", "A147035015", "A047096018", "A087050007...
## $ cluster_id <chr> "n6_c1", "n6_c5", "n6_c3", "n6_c5", "n6_c4", "n6_c2",...
## $ prod_type <chr> "pension", "savings", "protection", "protection", "sa...
## $ prem_type <chr> "RP", "RP", "RP", "RP", "RP", "RP", "RP", "RP", "RP",...
## $ prem_freq <int> 12, 12, 12, 1, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12...
## $ prem_ape <dbl> 3116.77, 984.82, 411.23, 718.15, 862.22, 2536.67, 520...
## $ prem_risk <dbl> NA, NA, 293.735, 512.961, NA, 1811.909, 3716.786, 732...
## $ policy_startdate <date> 1990-01-02, 1990-01-02, 1990-01-04, 1990-01-05, 1990...
## $ policy_enddate <date> 2084-11-23, 2063-05-08, 1995-01-04, 2010-01-05, 2010...
## $ policy_duration <int> NA, NA, 5, 20, 20, 20, 15, 15, 20, 10, 20, 20, 20, NA...
## $ mort_rating <int> NA, NA, 100, 100, NA, 200, 200, 150, 200, 200, 150, 2...
## $ sum_assured <dbl> NA, NA, 200000, 100000, NA, 400000, 500000, 100000, 5...
## $ dob_life1 <date> 1964-11-23, 1943-05-08, 1951-03-11, 1949-03-18, 1937...
## $ gender_life1 <chr> "M", "F", "F", "M", "M", "F", "M", "F", "F", "M", "M"...
## $ smoker_life1 <chr> "N", "S", "N", "N", "N", "S", "S", "Q", "S", "S", "Q"...
## $ isjointlife <lgl> NA, NA, FALSE, FALSE, NA, FALSE, TRUE, FALSE, FALSE, ...
## $ islifeonly <lgl> NA, NA, TRUE, FALSE, NA, FALSE, TRUE, TRUE, TRUE, TRU...
## $ mortgage_status <chr> NA, NA, "TERM", "MORTDECR", NA, "MORTDECR", "TERM", "...
## $ policy_status <chr> "lapsed", "lapsed", "completed", "lapsed", "lapsed", ...
## $ policy_statuschangedate <date> 1999-01-02, 1998-12-02, 1995-01-04, 1992-07-05, 1997...
## $ lapsed <lgl> TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE...
### _TEMPLATE_
### Do simple datatype transforms and save output in data_tbl
data_tbl <- rawdata_tbl
names(data_tbl) <- rawdata_tbl %>% names %>% clean_names
glimpse(data_tbl)
## Observations: 1,500,000
## Variables: 25
## $ policy_id <chr> "C010000046", "C010000213", "C010000371", "C010000425...
## $ countyname <chr> "Longford County", "Louth County", "Cork County", "Ki...
## $ edname <chr> "Longford No. 1 Urban", "St. Peter's", "Clonakilty Ur...
## $ nuts3name <chr> "Midland", "Border", "South-West (IE)", "Mid-East", "...
## $ sa_id <chr> "A137043001", "A147035015", "A047096018", "A087050007...
## $ cluster_id <chr> "n6_c1", "n6_c5", "n6_c3", "n6_c5", "n6_c4", "n6_c2",...
## $ prod_type <chr> "pension", "savings", "protection", "protection", "sa...
## $ prem_type <chr> "RP", "RP", "RP", "RP", "RP", "RP", "RP", "RP", "RP",...
## $ prem_freq <int> 12, 12, 12, 1, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12...
## $ prem_ape <dbl> 3116.77, 984.82, 411.23, 718.15, 862.22, 2536.67, 520...
## $ prem_risk <dbl> NA, NA, 293.735, 512.961, NA, 1811.909, 3716.786, 732...
## $ policy_startdate <date> 1990-01-02, 1990-01-02, 1990-01-04, 1990-01-05, 1990...
## $ policy_enddate <date> 2084-11-23, 2063-05-08, 1995-01-04, 2010-01-05, 2010...
## $ policy_duration <int> NA, NA, 5, 20, 20, 20, 15, 15, 20, 10, 20, 20, 20, NA...
## $ mort_rating <int> NA, NA, 100, 100, NA, 200, 200, 150, 200, 200, 150, 2...
## $ sum_assured <dbl> NA, NA, 200000, 100000, NA, 400000, 500000, 100000, 5...
## $ dob_life1 <date> 1964-11-23, 1943-05-08, 1951-03-11, 1949-03-18, 1937...
## $ gender_life1 <chr> "M", "F", "F", "M", "M", "F", "M", "F", "F", "M", "M"...
## $ smoker_life1 <chr> "N", "S", "N", "N", "N", "S", "S", "Q", "S", "S", "Q"...
## $ isjointlife <lgl> NA, NA, FALSE, FALSE, NA, FALSE, TRUE, FALSE, FALSE, ...
## $ islifeonly <lgl> NA, NA, TRUE, FALSE, NA, FALSE, TRUE, TRUE, TRUE, TRU...
## $ mortgage_status <chr> NA, NA, "TERM", "MORTDECR", NA, "MORTDECR", "TERM", "...
## $ policy_status <chr> "lapsed", "lapsed", "completed", "lapsed", "lapsed", ...
## $ policy_statuschangedate <date> 1999-01-02, 1998-12-02, 1995-01-04, 1992-07-05, 1997...
## $ lapsed <lgl> TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE...
We now create derived features useful for modelling. These values are new variables calculated from existing variables in the data.
# data_tbl <- data_tbl %>%
# mutate()
Before we do anything with the data, we first check for missing values in the dataset. In some cases, missing data is coded by a special character rather than as a blank, so we first correct for this.
### _TEMPLATE_
### ADD CODE TO CORRECT FOR DATA ENCODING HERE
With missing data properly encoded, we now visualise the missing data in a number of different ways.
We first examine a simple univariate count of all the missing data:
row_count <- data_tbl %>% nrow
missing_univariate_tbl <- data_tbl %>%
summarise_all(funs(sum(is.na(.)))) %>%
gather('variable','missing_count') %>%
mutate(missing_prop = missing_count / row_count)
ggplot(missing_univariate_tbl) +
geom_bar(aes(x = fct_reorder(variable, -missing_prop), weight = missing_prop)) +
scale_y_continuous(labels = comma) +
xlab("Variable") +
ylab("Missing Value Proportion") +
theme(axis.text.x = element_text(angle = 90))
We remove all variables where all of the entries are missing
remove_vars <- missing_univariate_tbl %>%
filter(missing_count == row_count) %>%
.[['variable']]
lessmiss_data_tbl <- data_tbl %>%
dplyr::select(-one_of(remove_vars))
With these columns removed, we repeat the exercise.
missing_univariate_tbl <- lessmiss_data_tbl %>%
summarise_all(funs(sum(is.na(.)))) %>%
gather('variable','missing_count') %>%
mutate(missing_prop = missing_count / row_count)
ggplot(missing_univariate_tbl) +
geom_bar(aes(x = fct_reorder(variable, -missing_prop), weight = missing_prop)) +
scale_y_continuous(labels = comma) +
xlab("Variable") +
ylab("Missing Value Proportion") +
theme(axis.text.x = element_text(angle = 90))
To reduce the scale of this plot, we look at the top twenty missing data counts.
missing_univariate_top_tbl <- missing_univariate_tbl %>%
arrange(desc(missing_count)) %>%
top_n(n = 50, wt = missing_count)
ggplot(missing_univariate_top_tbl) +
geom_bar(aes(x = fct_reorder(variable, -missing_prop), weight = missing_prop)) +
scale_y_continuous(labels = comma) +
xlab("Variable") +
ylab("Missing Value Proportion") +
theme(axis.text.x = element_text(angle = 90))
It is useful to get an idea of what combinations of variables tend to have variables with missing values simultaneously, so to construct a visualisation for this we create a count of all the times given combinations of variables have missing values, producing a heat map for these combination counts.
missing_plot_tbl <- rawdata_tbl %>%
mutate_all(funs(is.na)) %>%
mutate_all(funs(as.numeric)) %>%
mutate(label = do.call(paste0, (.))) %>%
group_by(label) %>%
summarise_all(funs(sum)) %>%
arrange(desc(label)) %>%
dplyr::select(-label) %>%
mutate(rowid = do.call(pmax, (.))) %>%
gather('col','count', -rowid) %>%
mutate(Proportion = count / row_count
,rowid = round(rowid / row_count, 4)
)
ggplot(missing_plot_tbl) +
geom_tile(aes(x = col, y = as.factor(rowid), fill = Proportion), height = 0.8) +
scale_fill_continuous(labels = comma) +
scale_x_discrete(position = 'top') +
xlab("Variable") +
ylab("Missing Value Proportion") +
theme(axis.text.x = element_text(angle = 90))
This visualisation takes a little explaining.
Each row represents a combination of variables with simultaneous missing values. For each row in the graphic, the coloured entries show which particular variables are missing in that combination. The proportion of rows with that combination is displayed in both the label for the row and the colouring for the cells in the row.
With the raw data loaded up we now remove obvious unique or near-unique variables that are not amenable to basic exploration and plotting.
coltype_lst <- create_coltype_list(data_tbl)
if(!is.null(coltype_lst$split$discrete)) {
catvar_valuecount_tbl <- data_tbl %>%
summarise_at(coltype_lst$split$discrete
,function(x) length(unique(x))) %>%
gather('var_name', 'level_count') %>%
arrange(-level_count)
print(catvar_valuecount_tbl)
row_count <- nrow(data_tbl)
cat(paste0("Dataset has ", row_count, " rows\n"))
} else {
cat('No categorical variables in dataset\n')
}
## # A tibble: 12 x 2
## var_name level_count
## <chr> <int>
## 1 policy_id 1500000
## 2 sa_id 18481
## 3 edname 3109
## 4 countyname 34
## 5 nuts3name 8
## 6 cluster_id 6
## 7 prod_type 4
## 8 mortgage_status 4
## 9 smoker_life1 3
## 10 policy_status 3
## 11 prem_type 2
## 12 gender_life1 2
## Dataset has 1500000 rows
Now that we a table of the counts of all the categorical variables we can automatically exclude unique variables from the exploration, as the level count will match the row count.
if(!is.null(coltype_lst$split$discrete)) {
unique_vars <- catvar_valuecount_tbl %>%
filter(level_count == row_count) %>%
.[["var_name"]]
print(unique_vars)
explore_data_tbl <- data_tbl %>%
dplyr::select(-one_of(unique_vars))
} else {
explore_data_tbl <- data_tbl
}
## [1] "policy_id"
Having removed the unique identifier variables from the dataset, we may also wish to exclude categoricals with high level counts also, so we create a vector of those variable names.
if(!is.null(coltype_lst$split$discrete)) {
highcount_vars <- catvar_valuecount_tbl %>%
filter(level_count >= dataexp_level_exclusion_threshold
,level_count < row_count) %>%
.[["var_name"]]
cat(paste0(highcount_vars, collapse = ', '))
} else {
highcount_vars <- c()
}
## sa_id, edname
We now can continue doing some basic exploration of the data. We may also choose to remove some extra columns from the dataset.
### You may want to comment out these next few lines to customise which
### categoricals are kept in the exploration.
drop_vars <- c(highcount_vars)
if(length(drop_vars) > 0) {
explore_data_tbl <- explore_data_tbl %>%
dplyr::select(-one_of(drop_vars))
cat(paste0(drop_vars, collapse = ', '))
}
## sa_id, edname
#data_tbl %>% write_csv (path = 'data/lifebook_cleaned.csv')
data_tbl %>% write_feather(path = 'data/lifebook_cleaned.feather')
Now that we have loaded the data we can prepare it for some basic data exploration. We first exclude the variables that are unique identifiers or similar, and tehen split the remaining variables out into various categories to help with the systematic data exploration.
coltype_lst <- create_coltype_list(explore_data_tbl)
print(coltype_lst)
## $split
## $split$continuous
## [1] "prem_freq" "prem_ape" "prem_risk" "policy_duration"
## [5] "mort_rating" "sum_assured"
##
## $split$datetime
## [1] "policy_startdate" "policy_enddate" "dob_life1"
## [4] "policy_statuschangedate"
##
## $split$discrete
## [1] "countyname" "nuts3name" "cluster_id" "prod_type"
## [5] "prem_type" "gender_life1" "smoker_life1" "mortgage_status"
## [9] "policy_status"
##
## $split$logical
## [1] "isjointlife" "islifeonly" "lapsed"
##
##
## $columns
## countyname nuts3name cluster_id
## "discrete" "discrete" "discrete"
## prod_type prem_type prem_freq
## "discrete" "discrete" "continuous"
## prem_ape prem_risk policy_startdate
## "continuous" "continuous" "datetime"
## policy_enddate policy_duration mort_rating
## "datetime" "continuous" "continuous"
## sum_assured dob_life1 gender_life1
## "continuous" "datetime" "discrete"
## smoker_life1 isjointlife islifeonly
## "discrete" "logical" "logical"
## mortgage_status policy_status policy_statuschangedate
## "discrete" "discrete" "datetime"
## lapsed
## "logical"
Logical variables only take two values: TRUE or FALSE. It is useful to see missing data as well though, so we also plot the count of those.
logical_vars <- coltype_lst$split$logical
for(plot_varname in logical_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
na_count <- explore_data_tbl %>% .[[plot_varname]] %>% is.na %>% sum
explore_plot <- ggplot(explore_data_tbl) +
geom_bar(aes_string(x = plot_varname)) +
xlab(plot_varname) +
ylab("Count") +
scale_y_continuous(labels = comma) +
ggtitle(paste0('Barplot of Counts for Variable: ', plot_varname
,' (', na_count, ' missing values)')) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
plot(explore_plot)
}
## --
## isjointlife
## --
## islifeonly
## --
## lapsed
Numeric variables are usually continuous in nature, though we also have integer data.
numeric_vars <- coltype_lst$split$continuous
for(plot_varname in numeric_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
plot_var <- explore_data_tbl %>% .[[plot_varname]]
na_count <- plot_var %>% is.na %>% sum
plot_var %>% summary %>% print
explore_plot <- ggplot(explore_data_tbl) +
geom_histogram(aes_string(x = plot_varname), bins = dataexp_hist_bins_count) +
geom_vline(xintercept = mean (plot_var, na.rm = TRUE), colour = 'red', size = 1.5) +
geom_vline(xintercept = median(plot_var, na.rm = TRUE), colour = 'green', size = 1.5) +
xlab(plot_varname) +
ylab("Count") +
scale_x_continuous(labels = comma) +
scale_y_continuous(labels = comma) +
ggtitle(paste0('Histogram Plot for Variable: ', plot_varname
,' (', na_count, ' missing values)')
,subtitle = '(red line is mean, green line is median)')
print(explore_plot)
}
## --
## prem_freq
## Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
## 1 12 12 11 12 12 233999
## --
## prem_ape
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 36 896 2052 4983 4491 957600
## --
## prem_risk
## Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
## 26 607 1350 3481 3152 684000 727695
## --
## policy_duration
## Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
## 5 10 20 17 20 35 475798
## --
## mort_rating
## Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
## 100 100 150 142 200 300 727695
## --
## sum_assured
## Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
## 100000 200000 300000 435045 450000 5000000 727695
Categorical variables only have values from a limited, and usually fixed, number of possible values
categorical_vars <- coltype_lst$split$discrete
for(plot_varname in categorical_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
na_count <- explore_data_tbl %>% .[[plot_varname]] %>% is.na %>% sum
plot_tbl <- explore_data_tbl %>%
.[[plot_varname]] %>%
as.character %>%
fct_lump(n = dataexp_cat_level_count) %>%
fct_count
explore_plot <- ggplot(plot_tbl) +
geom_bar(aes(x = fct_reorder(f, -n), weight = n)) +
xlab(plot_varname) +
ylab("Count") +
scale_y_continuous(labels = comma) +
ggtitle(paste0('Barplot of Counts for Variable: ', plot_varname
,' (', na_count, ' missing values)')) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
plot(explore_plot)
}
## --
## countyname
## --
## nuts3name
## --
## cluster_id
## --
## prod_type
## --
## prem_type
## --
## gender_life1
## --
## smoker_life1
## --
## mortgage_status
## --
## policy_status
Date/Time variables represent calendar or time-based data should as time of the day, a date, or a timestamp.
datetime_vars <- coltype_lst$split$datetime
for(plot_varname in datetime_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
plot_var <- explore_data_tbl %>% .[[plot_varname]]
na_count <- plot_var %>% is.na %>% sum
plot_var %>% summary %>% print
explore_plot <- ggplot(explore_data_tbl) +
geom_histogram(aes_string(x = plot_varname), bins = dataexp_hist_bins_count) +
xlab(plot_varname) +
ylab("Count") +
scale_y_continuous(labels = comma) +
ggtitle(paste0('Barplot of Dates/Times in Variable: ', plot_varname
,' (', na_count, ' missing values)'))
plot(explore_plot)
}
## --
## policy_startdate
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## "1990-01-02" "1999-02-23" "2002-07-23" "2002-10-05" "2007-02-20" "2015-12-31"
## --
## policy_enddate
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## "1995-01-04" "2016-07-26" "2025-01-22" "2038-06-20" "2067-05-06" "2117-11-13"
## --
## dob_life1
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## "1899-08-10" "1953-03-15" "1961-08-09" "1960-11-30" "1969-08-30" "1999-10-17"
## --
## policy_statuschangedate
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## "1990-04-20" "2004-09-01" "2008-06-01" "2007-12-21" "2011-12-17" "2015-12-31"
We now move on to looking at bivariate plots of the data set.
Pairs plots area very useful way of getting a quick idea of the relationships between variables in a data set.
Unfortunately, they do not scale well. Too many rows (say more than 5,000) can slow down the rendering, and more than 10 columns can make the plots uninterpretable as each cell is too small.
The technique is useful, so to circumvent these issues we sample the dataset. We select random columns and rows, and make a pairs plot of the subset, repeating this process for a number of iterations.
dataexp_pairsplot_itercount <- 10
dataexp_pairsplot_colcount <- 7
dataexp_pairsplot_rowcount <- 5000
if(ncol(data_tbl) > dataexp_pairsplot_colcount ||
nrow(data_tbl) > dataexp_pairsplot_rowcount) {
### Ugly hack to work around current dplyr bug for mutate_if
if(any(sapply(explore_data_tbl, is.logical))) {
conv_tbl <- explore_data_tbl %>%
mutate_if(is.logical, as.factor)
} else {
conv_tbl <- explore_data_tbl
}
conv_tbl <- conv_tbl %>%
mutate_if(function(x) (is.character(x) || is.factor(x)) && !all(is.na(x))
,function(x) fct_lump(x, n = 9))
for(i in 1:dataexp_pairsplot_itercount) {
cat("--\n")
cat(paste0("Pairs plot iter: ", i, "\n"))
pairs_tbl <- conv_tbl %>%
create_ggpairs_tbl(sample_cols = dataexp_pairsplot_colcount
,sample_rows = dataexp_pairsplot_rowcount
)
cat(paste0("Columns: ", paste0(names(pairs_tbl), collapse = ', '), "\n"))
pairs_tbl %>%
ggpairs(cardinality_threshold = NULL
,lower = list(combo = wrap('facethist', bins = 25))
) %>%
print
}
} else {
ggpairs(data_tbl) %>% print
}
## --
## Pairs plot iter: 1
## Columns: prem_freq, policy_startdate, mort_rating, smoker_life1, islifeonly, policy_status, policy_statuschangedate
## --
## Pairs plot iter: 2
## Columns: countyname, cluster_id, policy_enddate, mort_rating, smoker_life1, policy_status, lapsed
## --
## Pairs plot iter: 3
## Columns: countyname, cluster_id, prem_freq, prem_ape, prem_risk, sum_assured, islifeonly
## --
## Pairs plot iter: 4
## Columns: countyname, prem_ape, policy_startdate, mort_rating, sum_assured, smoker_life1, lapsed
## --
## Pairs plot iter: 5
## Columns: countyname, nuts3name, policy_startdate, policy_enddate, mort_rating, dob_life1, isjointlife
## --
## Pairs plot iter: 6
## Columns: nuts3name, cluster_id, policy_startdate, policy_duration, mort_rating, smoker_life1, policy_statuschangedate
## --
## Pairs plot iter: 7
## Columns: nuts3name, prod_type, prem_freq, policy_duration, dob_life1, isjointlife, islifeonly
## --
## Pairs plot iter: 8
## Columns: cluster_id, prod_type, prem_ape, policy_duration, mort_rating, dob_life1, smoker_life1
## --
## Pairs plot iter: 9
## Columns: nuts3name, prod_type, prem_ape, policy_startdate, policy_duration, mortgage_status, policy_statuschangedate
## --
## Pairs plot iter: 10
## Columns: cluster_id, prem_freq, prem_ape, policy_startdate, policy_enddate, islifeonly, policy_status
We want to look at how the variables split on the logical variables as this is a very natural way to observe the data.
facet_varname <- 'prod_type'
facet_count_max <- 3
facet_formula <- formula(paste0("~ as.factor(", facet_varname, ")"))
For logical variables we facet on barplots of the levels, comparing TRUE, FALSE and missing data.
logical_vars <- logical_vars[!logical_vars %in% facet_varname]
for(plot_varname in logical_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
filter_formula <- formula(paste0("~ !is.na(", plot_varname, ")"))
plot_tbl <- data_tbl %>% filter_(filter_formula)
facet_count <- plot_tbl %>%
.[[facet_varname]] %>%
unique %>%
length %>%
min(facet_count_max)
explore_plot <- ggplot(plot_tbl) +
geom_bar(aes_string(x = plot_varname)) +
facet_wrap(facet_formula, scales = 'free') +
xlab(plot_varname) +
ylab("Count") +
scale_y_continuous(labels = comma) +
ggtitle(paste0(facet_varname, '-Faceted Barplots for Variable: ', plot_varname)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
plot(explore_plot)
}
## --
## isjointlife
## --
## islifeonly
## --
## lapsed
For numeric variables, we facet on histograms of the data.
for(plot_varname in numeric_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
filter_formula <- formula(paste0("~ !is.na(", plot_varname, ")"))
plot_tbl <- data_tbl %>% filter_(filter_formula)
facet_count <- plot_tbl %>%
.[[facet_varname]] %>%
unique %>%
length %>%
min(facet_count_max)
explore_plot <- ggplot(plot_tbl) +
geom_histogram(aes_string(x = plot_varname), bins = dataexp_hist_bins_count) +
facet_wrap(facet_formula, scales = 'free') +
xlab(plot_varname) +
ylab("Count") +
scale_x_continuous(labels = comma) +
scale_y_continuous(labels = comma) +
ggtitle(paste0(facet_varname, '-Faceted Histogram for Variable: ', plot_varname)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
print(explore_plot)
}
## --
## prem_freq
## --
## prem_ape
## --
## prem_risk
## --
## policy_duration
## --
## mort_rating
## --
## sum_assured
We treat categorical variables like logical variables, faceting the barplots of the different levels of the data.
categorical_vars <- categorical_vars[!categorical_vars %in% facet_varname]
for(plot_varname in categorical_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
filter_formula <- formula(paste0("~ !is.na(", plot_varname, ")"))
plot_tbl <- data_tbl %>% filter_(filter_formula)
facet_count <- plot_tbl %>%
.[[facet_varname]] %>%
unique %>%
length %>%
min(facet_count_max)
explore_plot <- ggplot(plot_tbl) +
geom_bar(aes_string(x = plot_varname)) +
facet_wrap(facet_formula, scales = 'free') +
xlab(plot_varname) +
ylab("Count") +
scale_y_continuous(labels = comma) +
ggtitle(paste0(facet_varname, '-Faceted Histogram for Variable: ', plot_varname)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
plot(explore_plot)
}
## --
## countyname
## --
## nuts3name
## --
## cluster_id
## --
## prem_type
## --
## gender_life1
## --
## smoker_life1
## --
## mortgage_status
## --
## policy_status
Like the univariate plots, we facet on histograms of the years in the dates.
for(plot_varname in datetime_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
filter_formula <- formula(paste0("~ !is.na(", plot_varname, ")"))
plot_tbl <- data_tbl %>%
filter_(filter_formula) %>%
mutate_(plot_vartmp = plot_varname) %>%
mutate(plot_var = year(plot_vartmp))
facet_count <- plot_tbl %>%
.[[facet_varname]] %>%
unique %>%
length %>%
min(facet_count_max)
explore_plot <- ggplot(plot_tbl) +
geom_bar(aes(x = plot_var)) +
facet_wrap(facet_formula, scales = 'free') +
xlab(plot_varname) +
ylab("Count") +
scale_y_continuous(labels = comma) +
ggtitle(paste0(facet_varname, '-Faceted Histogram for Variable: ', plot_varname))
plot(explore_plot)
}
## --
## policy_startdate
## --
## policy_enddate
## --
## dob_life1
## --
## policy_statuschangedate